from future import absolute_import
from future import division
from future import print_function
import numpy as np
import tensorflow as tf
import os
import loader as loader
from eval import eval

FLAGS.dataset = “sogou”
FLAGS.tf_checkpoint_dir
FLAGS.tf_checkpoint_dir = “model/“
FLAGS.model = “model.ckpt-44”
FLAGS.dict = “dict.sogou.100000”

batch_size = 64
interval=[4000,20000,40000,100004]
cluster_num = len(interval) - 1

def get_head_label(targetY):

head_labels = np.reshape(targetY, [-1])
for i in range(cluster_num):
    mask = np.logical_and(np.greater_equal(targetY, interval[i]), np.less(targetY, interval[i + 1]))
    head_labels = np.where(mask, [interval[0] + i] * head_labels.shape[0], head_labels)
return head_labels

e = eval()
result = e.loader.get_batch(batch_size)
all_names = []
all_losses = []
all_scores = []
while result:
inputX_test, targetY_test, seqlen, names = result
val = e.eval(inputX_test,targetY_test,seqlen)
losses ,scores = e.get_loss(val,seqlen)
all_names.extend(names)
all_losses.extend(losses[:len(names)])
all_scores.extend(scores[:len(names)])
result = e.loader.get_batch(batch_size)

#print('the loss is ')
#for sent , loss in zip(inputX_test,losses):
#    print(unicode(e.vocab.decode(sent),encoding='utf8').encode('utf8'))
#    print(loss)

with open(‘lm-sencond.txt’,’w’) as file:
for name,loss in zip(all_names,all_losses):
file.write(name+’ ‘+str(loss)+’\n’)
with open(‘lm-second-score.txt’,’w’) as file:
for name,score in zip(all_names,all_scores):
file.write(name+’ ‘+str(score)+’\n’)